From: kfraser@localhost.localdomain Date: Wed, 19 Sep 2007 09:24:24 +0000 (+0100) Subject: hvm: hvm_{load,store}_cpu_guest_regs() does not touch segment X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~14937^2~68 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks:///%22http:/www.example.com/cgi/%22https:/%22bookmarks:/?a=commitdiff_plain;h=3b375e9de0061dea8f1eace7eddac2bdfc4e65df;p=xen.git hvm: hvm_{load,store}_cpu_guest_regs() does not touch segment selectors. We have separate accessors for that now. It is now an invariant that guest_cpu_user_regs()->{cs,ds,es,fs,gs,ss} are invalid for an HVM guest. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 24b579d466..f325a79b3c 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -555,18 +555,27 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) if ( is_hvm_vcpu(v) ) { if ( !is_pv_32on64_domain(v->domain) ) - hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg); + { + hvm_store_cpu_guest_regs(v, &c.nat->user_regs); + memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg)); + c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0]; + c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2]; + c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3]; + c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4]; + } #ifdef CONFIG_COMPAT else { struct cpu_user_regs user_regs; - typeof(c.nat->ctrlreg) ctrlreg; unsigned i; - hvm_store_cpu_guest_regs(v, &user_regs, ctrlreg); + hvm_store_cpu_guest_regs(v, &user_regs); XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs); - for ( i = 0; i < ARRAY_SIZE(c.cmp->ctrlreg); ++i ) - c.cmp->ctrlreg[i] = ctrlreg[i]; + memset(c.cmp->ctrlreg, 0, sizeof(c.cmp->ctrlreg)); + c.cmp->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0]; + c.cmp->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2]; + c.cmp->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3]; + c.cmp->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4]; } #endif } diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 795c71a727..ad930fb99b 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -973,7 +973,7 @@ void hvm_task_switch( goto out; } - hvm_store_cpu_guest_regs(v, regs, NULL); + hvm_store_cpu_guest_regs(v, regs); ptss = hvm_map(prev_tr.base, sizeof(tss)); if ( ptss == NULL ) @@ -1322,7 +1322,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs) #endif case 4: case 2: - hvm_store_cpu_guest_regs(current, regs, NULL); + hvm_store_cpu_guest_regs(current, regs); if ( unlikely(ring_3(regs)) ) { default: diff --git a/xen/arch/x86/hvm/platform.c b/xen/arch/x86/hvm/platform.c index 3d69e9cca5..db66c9cbe4 100644 --- a/xen/arch/x86/hvm/platform.c +++ b/xen/arch/x86/hvm/platform.c @@ -1032,7 +1032,7 @@ void handle_mmio(unsigned long gpa) /* Copy current guest state into io instruction state structure. */ memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES); - hvm_store_cpu_guest_regs(v, regs, NULL); + hvm_store_cpu_guest_regs(v, regs); df = regs->eflags & X86_EFLAGS_DF ? 1 : 0; diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 846f5074a8..b165c45dc3 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -109,27 +109,13 @@ static int svm_lme_is_set(struct vcpu *v) } static void svm_store_cpu_guest_regs( - struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs) + struct vcpu *v, struct cpu_user_regs *regs) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - if ( regs != NULL ) - { - regs->ss = vmcb->ss.sel; - regs->esp = vmcb->rsp; - regs->eflags = vmcb->rflags; - regs->cs = vmcb->cs.sel; - regs->eip = vmcb->rip; - } - - if ( crs != NULL ) - { - /* Returning the guest's regs */ - crs[0] = v->arch.hvm_vcpu.guest_cr[0]; - crs[2] = v->arch.hvm_vcpu.guest_cr[2]; - crs[3] = v->arch.hvm_vcpu.guest_cr[3]; - crs[4] = v->arch.hvm_vcpu.guest_cr[4]; - } + regs->esp = vmcb->rsp; + regs->eflags = vmcb->rflags; + regs->eip = vmcb->rip; } static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs) @@ -702,7 +688,6 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg, { case x86_seg_cs: memcpy(&vmcb->cs, reg, sizeof(*reg)); - guest_cpu_user_regs()->cs = reg->sel; break; case x86_seg_ds: memcpy(&vmcb->ds, reg, sizeof(*reg)); @@ -722,7 +707,7 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg, break; case x86_seg_ss: memcpy(&vmcb->ss, reg, sizeof(*reg)); - guest_cpu_user_regs()->ss = reg->sel; + vmcb->cpl = vmcb->ss.attr.fields.dpl; break; case x86_seg_tr: svm_sync_vmcb(v); @@ -829,10 +814,8 @@ static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - vmcb->ss.sel = regs->ss; vmcb->rsp = regs->esp; vmcb->rflags = regs->eflags | 2UL; - vmcb->cs.sel = regs->cs; vmcb->rip = regs->eip; } @@ -1518,7 +1501,7 @@ static void svm_io_instruction(struct vcpu *v) /* Copy current guest state into io instruction state structure. */ memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES); - svm_store_cpu_guest_regs(v, regs, NULL); + svm_store_cpu_guest_regs(v, regs); info.bytes = vmcb->exitinfo1; @@ -2292,7 +2275,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) case VMEXIT_EXCEPTION_MC: HVMTRACE_0D(MCE, v); - svm_store_cpu_guest_regs(v, regs, NULL); + svm_store_cpu_guest_regs(v, regs); do_machine_check(regs); break; diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 3f6d7e0620..eabb369160 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -794,61 +794,25 @@ static void vmx_ctxt_switch_to(struct vcpu *v) } static void vmx_store_cpu_guest_regs( - struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs) + struct vcpu *v, struct cpu_user_regs *regs) { vmx_vmcs_enter(v); - if ( regs != NULL ) - { - regs->eflags = __vmread(GUEST_RFLAGS); - regs->ss = __vmread(GUEST_SS_SELECTOR); - regs->cs = __vmread(GUEST_CS_SELECTOR); - regs->eip = __vmread(GUEST_RIP); - regs->esp = __vmread(GUEST_RSP); - } - - if ( crs != NULL ) - { - crs[0] = v->arch.hvm_vcpu.guest_cr[0]; - crs[2] = v->arch.hvm_vcpu.guest_cr[2]; - crs[3] = v->arch.hvm_vcpu.guest_cr[3]; - crs[4] = v->arch.hvm_vcpu.guest_cr[4]; - } + regs->eflags = __vmread(GUEST_RFLAGS); + regs->eip = __vmread(GUEST_RIP); + regs->esp = __vmread(GUEST_RSP); vmx_vmcs_exit(v); } static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) { - unsigned long base; - vmx_vmcs_enter(v); - __vmwrite(GUEST_SS_SELECTOR, regs->ss); - __vmwrite(GUEST_RSP, regs->esp); - /* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */ __vmwrite(GUEST_RFLAGS, regs->eflags | 2UL); - - if ( regs->eflags & EF_VM ) - { - /* - * The VMX spec (section 4.3.1.2, Checks on Guest Segment - * Registers) says that virtual-8086 mode guests' segment - * base-address fields in the VMCS must be equal to their - * corresponding segment selector field shifted right by - * four bits upon vmentry. - */ - base = __vmread(GUEST_CS_BASE); - if ( (regs->cs << 4) != base ) - __vmwrite(GUEST_CS_BASE, regs->cs << 4); - base = __vmread(GUEST_SS_BASE); - if ( (regs->ss << 4) != base ) - __vmwrite(GUEST_SS_BASE, regs->ss << 4); - } - - __vmwrite(GUEST_CS_SELECTOR, regs->cs); __vmwrite(GUEST_RIP, regs->eip); + __vmwrite(GUEST_RSP, regs->esp); vmx_vmcs_exit(v); } @@ -978,7 +942,6 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, __vmwrite(GUEST_CS_LIMIT, reg->limit); __vmwrite(GUEST_CS_BASE, reg->base); __vmwrite(GUEST_CS_AR_BYTES, attr); - guest_cpu_user_regs()->cs = reg->sel; break; case x86_seg_ds: __vmwrite(GUEST_DS_SELECTOR, reg->sel); @@ -1009,7 +972,6 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, __vmwrite(GUEST_SS_LIMIT, reg->limit); __vmwrite(GUEST_SS_BASE, reg->base); __vmwrite(GUEST_SS_AR_BYTES, attr); - guest_cpu_user_regs()->ss = reg->sel; break; case x86_seg_tr: __vmwrite(GUEST_TR_SELECTOR, reg->sel); @@ -1890,7 +1852,7 @@ static void vmx_io_instruction(unsigned long exit_qualification, /* Copy current guest state into io instruction state structure. */ memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES); - vmx_store_cpu_guest_regs(current, regs, NULL); + vmx_store_cpu_guest_regs(current, regs); HVM_DBG_LOG(DBG_LEVEL_IO, "vm86 %d, eip=%x:%lx, " "exit_qualification = %lx", @@ -2639,7 +2601,7 @@ static void vmx_failed_vmentry(unsigned int exit_reason, case EXIT_REASON_MACHINE_CHECK: printk("caused by machine check.\n"); HVMTRACE_0D(MCE, current); - vmx_store_cpu_guest_regs(current, regs, NULL); + vmx_store_cpu_guest_regs(current, regs); do_machine_check(regs); break; default: @@ -2761,12 +2723,12 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs) (X86_EVENTTYPE_NMI << 8) ) goto exit_and_crash; HVMTRACE_0D(NMI, v); - vmx_store_cpu_guest_regs(v, regs, NULL); + vmx_store_cpu_guest_regs(v, regs); do_nmi(regs); /* Real NMI, vector 2: normal processing. */ break; case TRAP_machine_check: HVMTRACE_0D(MCE, v); - vmx_store_cpu_guest_regs(v, regs, NULL); + vmx_store_cpu_guest_regs(v, regs); do_machine_check(regs); break; default: diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index fb0ef1949b..82c54c5207 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -2929,7 +2929,7 @@ static int sh_page_fault(struct vcpu *v, goto done; } - hvm_store_cpu_guest_regs(v, regs, NULL); + hvm_store_cpu_guest_regs(v, regs); } SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n", diff --git a/xen/arch/x86/oprofile/op_model_athlon.c b/xen/arch/x86/oprofile/op_model_athlon.c index adcc9209f7..d3c757a57e 100644 --- a/xen/arch/x86/oprofile/op_model_athlon.c +++ b/xen/arch/x86/oprofile/op_model_athlon.c @@ -119,7 +119,7 @@ static int athlon_check_ctrs(unsigned int const cpu, (regs->eip == (unsigned long)svm_stgi_label)) { /* SVM guest was running when NMI occurred */ ASSERT(is_hvm_vcpu(v)); - hvm_store_cpu_guest_regs(v, guest_regs, NULL); + hvm_store_cpu_guest_regs(v, guest_regs); eip = guest_regs->eip; mode = xenoprofile_get_mode(v, guest_regs); } else { diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c index 813283b285..dcee52f65f 100644 --- a/xen/arch/x86/x86_32/traps.c +++ b/xen/arch/x86/x86_32/traps.c @@ -41,11 +41,29 @@ void show_registers(struct cpu_user_regs *regs) struct cpu_user_regs fault_regs = *regs; unsigned long fault_crs[8]; const char *context; + struct vcpu *v = current; - if ( is_hvm_vcpu(current) && guest_mode(regs) ) + if ( is_hvm_vcpu(v) && guest_mode(regs) ) { + struct segment_register sreg; context = "hvm"; - hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs); + hvm_store_cpu_guest_regs(v, &fault_regs); + fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0]; + fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2]; + fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3]; + fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4]; + hvm_get_segment_register(v, x86_seg_cs, &sreg); + fault_regs.cs = sreg.sel; + hvm_get_segment_register(v, x86_seg_ds, &sreg); + fault_regs.ds = sreg.sel; + hvm_get_segment_register(v, x86_seg_es, &sreg); + fault_regs.es = sreg.sel; + hvm_get_segment_register(v, x86_seg_fs, &sreg); + fault_regs.fs = sreg.sel; + hvm_get_segment_register(v, x86_seg_gs, &sreg); + fault_regs.gs = sreg.sel; + hvm_get_segment_register(v, x86_seg_ss, &sreg); + fault_regs.ss = sreg.sel; } else { @@ -63,7 +81,7 @@ void show_registers(struct cpu_user_regs *regs) else { context = "guest"; - fault_crs[2] = current->vcpu_info->arch.cr2; + fault_crs[2] = v->vcpu_info->arch.cr2; } fault_crs[0] = read_cr0(); diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index d1ceabcd6d..ed48005da3 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -44,18 +44,36 @@ void show_registers(struct cpu_user_regs *regs) struct cpu_user_regs fault_regs = *regs; unsigned long fault_crs[8]; const char *context; + struct vcpu *v = current; - if ( is_hvm_vcpu(current) && guest_mode(regs) ) + if ( is_hvm_vcpu(v) && guest_mode(regs) ) { + struct segment_register sreg; context = "hvm"; - hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs); + hvm_store_cpu_guest_regs(v, &fault_regs); + fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0]; + fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2]; + fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3]; + fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4]; + hvm_get_segment_register(v, x86_seg_cs, &sreg); + fault_regs.cs = sreg.sel; + hvm_get_segment_register(v, x86_seg_ds, &sreg); + fault_regs.ds = sreg.sel; + hvm_get_segment_register(v, x86_seg_es, &sreg); + fault_regs.es = sreg.sel; + hvm_get_segment_register(v, x86_seg_fs, &sreg); + fault_regs.fs = sreg.sel; + hvm_get_segment_register(v, x86_seg_gs, &sreg); + fault_regs.gs = sreg.sel; + hvm_get_segment_register(v, x86_seg_ss, &sreg); + fault_regs.ss = sreg.sel; } else { if ( guest_mode(regs) ) { context = "guest"; - fault_crs[2] = arch_get_cr2(current); + fault_crs[2] = arch_get_cr2(v); } else { diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 1d195dabf2..475e374ee6 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -85,7 +85,7 @@ struct hvm_function_table { * 2) modify guest state (e.g., set debug flags). */ void (*store_cpu_guest_regs)( - struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs); + struct vcpu *v, struct cpu_user_regs *r); void (*load_cpu_guest_regs)( struct vcpu *v, struct cpu_user_regs *r); @@ -168,9 +168,9 @@ void hvm_send_assist_req(struct vcpu *v); static inline void hvm_store_cpu_guest_regs( - struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs) + struct vcpu *v, struct cpu_user_regs *r) { - hvm_funcs.store_cpu_guest_regs(v, r, crs); + hvm_funcs.store_cpu_guest_regs(v, r); } static inline void